In [60]:
import numpy as np
from scipy.special import expit

Create NeuralNetwork Class: Multi Layer Perceptron


In [61]:
# Define NN class
class NeuralNetwork(object):
    # Initialize NN
    def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
        # Nodes
        self.in_nodes = input_nodes
        self.hn_nodes = hidden_nodes
        self.ot_nodes = output_nodes
        # Learning rate
        self.learning_rate = learning_rate
        
        # Weight Matix
        ## Weight, input layer to hidden layer
        ## np.random.normal(center, standard_deviation, dimension)
        self.wih = np.random.normal(0.0, pow(self.hn_nodes, -0.5), (self.hn_nodes, self.in_nodes))
        ## Weight, hidden layer to output layer
        self.who = np.random.normal(0.0, pow(self.ot_nodes, -0.5), (self.ot_nodes, self.hn_nodes))
        
        # Activation function
        self.activation_function = lambda x: expit(x)
        
    # Training logic
    def train(self, input_list, target_list):
        # FeedForward
        ## Transform list to 2-dimensional transposed ndarray(to column vector)
        inputs = np.array(input_list, ndmin=2).T
        targets = np.array(target_list, ndmin=2).T
        
        ## Operate input signal to hidden layer
        hidden_inputs = np.dot(self.wih, inputs)
        ## Operate ouput signal from hidden layer
        hidden_outputs = self.activation_function(hidden_inputs)
        ## Operate hidden signal to output layer
        output_inputs = np.dot(self.who, hidden_outputs)
        ## Operate output
        output_outputs = self.activation_function(output_inputs)
        
        # Back Propagation
        ## Errors
        output_errors = targets - output_outputs
        ## Hidden layers' errors
        hidden_errors = np.dot(self.who.T, output_errors)
        
        ## Update weight between output and hidden layers
        self.who += self.learning_rate * np.dot((output_errors * output_outputs * (1-output_outputs)),
                                                np.transpose(hidden_outputs))
        ## Update weight between hidden and input layers
        self.wih += self.learning_rate * np.dot((hidden_errors * hidden_outputs * (1-hidden_outputs)),
                                                np.transpose(inputs))
    def query(self, input_list):
        # Transform list to 2-dimensional transposed ndarray(to column vector)
        inputs = np.array(input_list, ndmin=2).T
        # Input to hidden layer
        hidden_inputs = np.dot(self.wih, inputs)
        # Outputs from hidden layer
        hidden_outputs = self.activation_function(hidden_inputs)
        # Hidden to output layer
        output_inputs = np.dot(self.who, hidden_outputs)
        # Outputs from output layer
        output_outputs = self.activation_function(output_inputs)
        return output_outputs

In [62]:
# Set initial values
input_nodes = 3
hidden_nodes = 3
output_nodes = 3
learning_rate = 0.3

n = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)

In [63]:
# Check the class
n.query([1.2, -0.5, 3.7])


Out[63]:
array([[ 0.56395742],
       [ 0.44300005],
       [ 0.54129046]])

NeuralNetwork with MNIST dataset

  • Explore dataset

In [64]:
# Load Training dataset
with open('./dataframe/[HYStudy 23th] mnist_train_100.csv', 'r') as f:
    train_list = f.readlines()

In [65]:
# Label, 28*28 matrix
train_list[0]


Out[65]:
'5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,18,18,18,126,136,175,26,166,255,247,127,0,0,0,0,0,0,0,0,0,0,0,0,30,36,94,154,170,253,253,253,253,253,225,172,253,242,195,64,0,0,0,0,0,0,0,0,0,0,0,49,238,253,253,253,253,253,253,253,253,251,93,82,82,56,39,0,0,0,0,0,0,0,0,0,0,0,0,18,219,253,253,253,253,253,198,182,247,241,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,80,156,107,253,253,205,11,0,43,154,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,14,1,154,253,90,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,139,253,190,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,11,190,253,70,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,35,241,225,160,108,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,81,240,253,253,119,25,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,45,186,253,253,150,27,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,16,93,252,253,187,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,249,253,249,64,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,46,130,183,253,253,207,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,39,148,229,253,253,253,250,182,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,24,114,221,253,253,253,253,201,78,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,23,66,213,253,253,253,253,198,81,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,18,171,219,253,253,253,253,195,80,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,55,172,226,253,253,253,253,244,133,11,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,136,253,253,253,212,135,132,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n'

In [66]:
# Import library to display data
import matplotlib.pylab as plt  # to use imshow

In [67]:
# Display data
## Create Subplots
fig, axes = plt.subplots(2, 5, subplot_kw={"xticks": [], "yticks": []})

# axes.flat: returns the axes as 1-dimensional(flat) array
for ax, i in zip(axes.flat, range(0, 10)):
    all_values = train_list[i].split(',')
    # String to float, all_values[0] = label, trainsform to 28*28 matrix
    image_array = np.asfarray(all_values[1:]).reshape(28, 28)
    ax.imshow(image_array, cmap="Greys")
    ax.set_title(i)
fig.subplots_adjust(hspace=-0.3)
plt.show()



In [68]:
# Display "1"(with grid)
all_values = train_list[3].split(',')
# String to float, all_values[0] = label, transform to 28*28 matrix
image_array = np.asfarray(all_values[1:]).reshape(28, 28)

# Draw image
plt.imshow(image_array, cmap="Greys")
plt.grid(True)
plt.title("Label {}".format(all_values[0]))
plt.show()

train_list[3]


Out[68]:
'1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,124,253,255,63,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,96,244,251,253,62,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,127,251,251,253,62,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,68,236,251,211,31,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,60,228,251,251,94,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,155,253,253,189,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,20,253,251,235,66,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,32,205,253,251,126,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,104,251,253,184,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,80,240,251,193,23,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,32,253,253,253,159,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,151,251,251,251,39,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,48,221,251,251,172,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,234,251,251,196,12,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,253,251,251,89,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,159,255,253,253,31,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,48,228,253,247,140,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,64,251,253,220,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,64,251,253,220,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,24,193,253,220,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n'

In [69]:
# Normalize RGB values
# +0.01 to prevent '0' as output
# '0' input makes activation function's output to '0'
norm_input = (np.asfarray(all_values[1:]) / 255 * 0.99) + 0.01
print(norm_input)


[ 0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.49141176  0.99223529
  1.          0.25458824  0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.38270588  0.95729412
  0.98447059  0.99223529  0.25070588  0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.50305882
  0.98447059  0.98447059  0.99223529  0.25070588  0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.274
  0.92623529  0.98447059  0.82917647  0.13035294  0.04105882  0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.24294118
  0.89517647  0.98447059  0.98447059  0.37494118  0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01
  0.61176471  0.99223529  0.99223529  0.74376471  0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01
  0.08764706  0.99223529  0.98447059  0.92235294  0.26623529  0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01
  0.13423529  0.80588235  0.99223529  0.98447059  0.49917647  0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.41376471  0.98447059  0.99223529  0.72435294  0.06823529  0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01
  0.32058824  0.94176471  0.98447059  0.75929412  0.09929412  0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01
  0.13423529  0.99223529  0.99223529  0.99223529  0.62729412  0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.59623529  0.98447059  0.98447059  0.98447059  0.16141176  0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01
  0.19635294  0.868       0.98447059  0.98447059  0.67776471  0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.91847059  0.98447059  0.98447059  0.77094118  0.05658824  0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.99223529  0.98447059  0.98447059  0.35552941  0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01
  0.62729412  1.          0.99223529  0.99223529  0.13035294  0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01
  0.19635294  0.89517647  0.99223529  0.96894118  0.55352941  0.04105882
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.25847059  0.98447059  0.99223529  0.86411765  0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.25847059  0.98447059  0.99223529  0.86411765  0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.10317647  0.75929412  0.99223529  0.86411765  0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01
  0.01        0.01        0.01        0.01        0.01        0.01        0.01      ]

In [70]:
# Create output label array to put in NN
# Range must be 0 < X < 1 (Sigmoid)
out_nodes = 10

# Target value(label)
# 1 -> 0.99, 0 -> 0.01 to satisfy constraint
targets = np.zeros(out_nodes) + 0.01
# all_values[0] = label
targets[int(all_values[0])] = 0.99

# label = 1(index[1] = 0.99)
targets


Out[70]:
array([ 0.01,  0.99,  0.01,  0.01,  0.01,  0.01,  0.01,  0.01,  0.01,  0.01])

Train the NN


In [100]:
# Define network structure
input_nodes = 784  # Data = 28*28

# The number of hidden nodes: choose heuristically
# Must be smaller than input nodes to extract core feature
# Properly larger than output nodes
hidden_nodes = 100

# The number of output nodes: same as the number of labels
output_nodes = 10

# Learning rate
learning_rate = 0.3

# Create instance of NN
n = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)

In [121]:
# Train the NN
for record in train_list:
    # Split the csv data by seperator ','
    all_values = record.split(',')
    # Normalize input values
    inputs = (np.asfarray(all_values[1:]) / 255 * 0.99) + 0.01
    # Create target value
    targets = np.zeros(output_nodes) + 0.01
    targets[int(all_values[0])] = 0.99
    n.train(inputs, targets)
    pass

Test the NN


In [73]:
# Load test dataset
with open('./dataframe/[HYStudy 23th] mnist_test_10.csv', 'r') as f:
    test_list = f.readlines()

In [74]:
# Test 1st data
test_values = test_list[0].split(',')
print("Correct answer is", test_values[0])


Correct answer is 7

In [122]:
# Send query with nomalized value
ans = n.query((np.asfarray(test_values[1:]) / 255.0 * 0.99) + 0.01)
print(ans)
print("Maximum value is {} and index is {}".format(max(ans), np.argmax(ans)))
print("Predicted label is", np.argmax(ans))


[[ 0.01994044]
 [ 0.00331181]
 [ 0.02346481]
 [ 0.00552693]
 [ 0.00213431]
 [ 0.02977635]
 [ 0.00120625]
 [ 0.97150379]
 [ 0.00624988]
 [ 0.02218302]]
Maximum value is [ 0.97150379] and index is 7
Predicted label is 7